Make phys_to_machine_mapping a static inline function.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
return;
}
- host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+ host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
if (__put_user(spte, (unsigned long *)
if (d->arch.shadow_mode == SHM_full_32) {
- guest_gpfn = phys_to_machine_mapping[gpfn];
+ guest_gpfn = phys_to_machine_mapping(gpfn);
if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) )
FAIL("spfn problem g.sf=%08lx",
if (d->arch.shadow_mode == SHM_full_32)
{
- host_gpfn = phys_to_machine_mapping[gpfn];
+ host_gpfn = phys_to_machine_mapping(gpfn);
gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
} else
index = (va >> L2_PAGETABLE_SHIFT);
if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
- pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
+ pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
pagetable_val(ed->arch.pagetable));
__guest_get_pl2e(ed, gva, &gpde);
index = (gva >> L2_PAGETABLE_SHIFT);
- pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
+ pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
ed->arch.guest_pl2e_cache[index] =
mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
/*
* The guest CR3 must be pointing to the guest physical.
*/
- if (!(pfn = phys_to_machine_mapping[
- d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT]))
+ if (!(pfn = phys_to_machine_mapping(
+ d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)))
{
VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n",
d->arch.arch_vmx.cpu_cr3);
* removed some translation or changed page attributes.
* We simply invalidate the shadow.
*/
- pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+ pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable))
__vmx_bug(regs);
vmx_shadow_clear_state(d->domain);
"Invalid CR3 value=%lx\n", value);
domain_crash(); /* need to take a clean path */
}
- pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+ pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
vmx_shadow_clear_state(d->domain);
d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
shadow_mk_pagetable(d);
printk("inst_copy_from_guest- EXIT: read gpte faulted" );
return 0;
}
- mfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+ mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
inst_start = (unsigned char *)map_domain_mem(ma);
addr = context->edi;
offset = (addr & ~PAGE_MASK);
addr = round_pgdown(addr);
- mpfn = phys_to_machine_mapping[addr >> PAGE_SHIFT];
+ mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
p = map_domain_mem(mpfn << PAGE_SHIFT);
e820p = (struct e820entry *) ((unsigned long) p + offset);
}
unmap_domain_mem(p);
- mpfn = phys_to_machine_mapping[gpfn];
+ mpfn = phys_to_machine_mapping(gpfn);
p = map_domain_mem(mpfn << PAGE_SHIFT);
d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
d->domain->arch.mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] =
mk_l1_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
}
- phys_to_machine_mapping[gpfn] = mpfn;
+ __phys_to_machine_mapping[gpfn] = mpfn;
return 0;
}
#undef phys_to_machine_mapping
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
-#define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
-
+#define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
+/* Returns the machine physical */
+static inline unsigned long phys_to_machine_mapping(unsigned long pfn)
+{
+ return __phys_to_machine_mapping[pfn];
+}
#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
#define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
#define __get_phys_to_machine(_d, host_gpfn, gpfn) \
if ((_d)->arch.shadow_mode == SHM_full_32) \
- (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
+ (host_gpfn) = phys_to_machine_mapping(gpfn); \
else \
(host_gpfn) = (gpfn);
{
unsigned long pfn;
- pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+ pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
{
unsigned long host_pfn, host_gpte;
- host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+ host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
spte = host_gpte | _PAGE_RW;
}
{
unsigned long host_pfn, host_gpte;
- host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+ host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
}
return;
}
- host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+ host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==